viosapic_set_irq(d, callback_irq, 0);
}
}
- hvm_dirq_assist(v);
}
rmb();
/* Crank the handle on interrupt state. */
pt_update_irq(v);
- hvm_dirq_assist(v);
do {
intack = hvm_vcpu_has_pending_irq(v);
/* Crank the handle on interrupt state. */
pt_update_irq(v);
- hvm_dirq_assist(v);
do {
intack = hvm_vcpu_has_pending_irq(v);
softirq_handlers[nr] = handler;
}
+void cpumask_raise_softirq(cpumask_t mask, unsigned int nr)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ if ( test_and_set_bit(nr, &softirq_pending(cpu)) )
+ cpu_clear(cpu, mask);
+
+ smp_send_event_check_mask(&mask);
+}
+
+void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
+{
+ if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
+ smp_send_event_check_cpu(cpu);
+}
+
+void raise_softirq(unsigned int nr)
+{
+ set_bit(nr, &softirq_pending(smp_processor_id()));
+}
+
static LIST_HEAD(tasklet_list);
static DEFINE_SPINLOCK(tasklet_lock);
#include <asm/hvm/iommu.h>
#include <xen/hvm/irq.h>
+static void hvm_dirq_assist(unsigned long _d);
+
static int pt_irq_need_timer(uint32_t flags)
{
return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
return -ENOMEM;
}
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
+ tasklet_init(&hvm_irq_dpci->dirq_tasklet,
+ hvm_dirq_assist, (unsigned long)d);
hvm_irq_dpci->mirq = xmalloc_array(struct hvm_mirq_dpci_mapping,
d->nr_pirqs);
hvm_irq_dpci->dirq_mask = xmalloc_array(unsigned long,
!test_bit(mirq, dpci->mapping))
return 0;
- /*
- * Set a timer here to avoid situations where the IRQ line is shared, and
- * the device belonging to the pass-through guest is not yet active. In
- * this case the guest may not pick up the interrupt (e.g., masked at the
- * PIC) and we need to detect that.
- */
set_bit(mirq, dpci->dirq_mask);
- if ( pt_irq_need_timer(dpci->mirq[mirq].flags) )
- set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
- NOW() + PT_IRQ_TIME_OUT);
- vcpu_kick(d->vcpu[0]);
-
+ tasklet_schedule(&dpci->dirq_tasklet);
return 1;
}
}
#endif
-void hvm_dirq_assist(struct vcpu *v)
+static void hvm_dirq_assist(unsigned long _d)
{
unsigned int irq;
uint32_t device, intx;
- struct domain *d = v->domain;
+ struct domain *d = (struct domain *)_d;
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
struct dev_intx_gsi_link *digl;
- if ( !iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
- return;
+ ASSERT(hvm_irq_dpci);
for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
irq < d->nr_pirqs;
continue;
}
#endif
- if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
- stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
-
list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
{
device = digl->device;
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci != NULL )
{
+ tasklet_kill(&hvm_irq_dpci->dirq_tasklet);
+
for ( i = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
i < d->nr_pirqs;
i = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, i + 1) )
#include <xen/types.h>
#include <xen/spinlock.h>
+#include <xen/softirq.h>
#include <asm/irq.h>
#include <public/hvm/save.h>
/* Record of mapped Links */
uint8_t link_cnt[NR_LINK];
struct timer hvm_timer[NR_VECTORS];
+ struct tasklet dirq_tasklet;
};
/* Modify state of a PCI INTx wire. */
void hvm_assert_evtchn_irq(struct vcpu *v);
void hvm_set_callback_via(struct domain *d, uint64_t via);
-void hvm_dirq_assist(struct vcpu *v);
-
#endif /* __XEN_HVM_IRQ_H__ */
void open_softirq(int nr, softirq_handler handler);
void softirq_init(void);
-static inline void cpumask_raise_softirq(cpumask_t mask, unsigned int nr)
-{
- int cpu;
-
- for_each_cpu_mask(cpu, mask)
- {
- if ( test_and_set_bit(nr, &softirq_pending(cpu)) )
- cpu_clear(cpu, mask);
- }
-
- smp_send_event_check_mask(&mask);
-}
-
-static inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
-{
- if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
- smp_send_event_check_cpu(cpu);
-}
-
-static inline void raise_softirq(unsigned int nr)
-{
- set_bit(nr, &softirq_pending(smp_processor_id()));
-}
+void cpumask_raise_softirq(cpumask_t mask, unsigned int nr);
+void cpu_raise_softirq(unsigned int cpu, unsigned int nr);
+void raise_softirq(unsigned int nr);
/*
* TASKLETS -- dynamically-allocatable tasks run in softirq context